early_stopping_cb = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5)
window_size = 168 # 7 days
X_train, y_train = create_sequences(train, window_size)
X_test, y_test = create_sequences(test, window_size)
print(f'X_train shape: {X_train.shape}, y_train shape: {y_train.shape}')
print(f'X_test shape: {X_test.shape}, y_test shape: {y_test.shape}')
print(f'LSTM input shape: {X_train.shape[1], X_train.shape[2]}')
input_shape = (X_train.shape[1], X_train.shape[2])
lstm_units = 100
dropout = 0.2
output_units = 1
model_2 = Sequential()
model_2.add(LSTM(lstm_units, input_shape=input_shape))
model_2.add(Dropout(dropout))
model_2.add(Dense(output_units))
model_2.compile(optimizer='adam', loss='mean_squared_error')
model_2.summary()
Model: "sequential_4"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_4 (LSTM) (None, 100) 40800
dropout_4 (Dropout) (None, 100) 0
dense_4 (Dense) (None, 1) 101
=================================================================
Total params: 40901 (159.77 KB)
Trainable params: 40901 (159.77 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
history_2 = model_2.fit(X_train, y_train,
epochs=100,
batch_size=32,
validation_data=(X_test, y_test),
callbacks=[early_stopping_cb],
shuffle=False
)
Epoch 1/100 874/874 [==============================] - 8s 7ms/step - loss: 0.0117 - val_loss: 0.0027 Epoch 2/100 874/874 [==============================] - 5s 6ms/step - loss: 0.0035 - val_loss: 0.0017 Epoch 3/100 874/874 [==============================] - 5s 6ms/step - loss: 0.0024 - val_loss: 0.0013 Epoch 4/100 874/874 [==============================] - 5s 6ms/step - loss: 0.0019 - val_loss: 0.0010 Epoch 5/100 874/874 [==============================] - 5s 6ms/step - loss: 0.0017 - val_loss: 9.8768e-04 Epoch 6/100 874/874 [==============================] - 5s 6ms/step - loss: 0.0015 - val_loss: 8.8220e-04 Epoch 7/100 874/874 [==============================] - 6s 7ms/step - loss: 0.0014 - val_loss: 8.6461e-04 Epoch 8/100 874/874 [==============================] - 5s 6ms/step - loss: 0.0013 - val_loss: 8.5392e-04 Epoch 9/100 874/874 [==============================] - 6s 7ms/step - loss: 0.0012 - val_loss: 7.7025e-04 Epoch 10/100 874/874 [==============================] - 5s 6ms/step - loss: 0.0011 - val_loss: 7.4033e-04 Epoch 11/100 874/874 [==============================] - 5s 6ms/step - loss: 0.0011 - val_loss: 7.4215e-04 Epoch 12/100 874/874 [==============================] - 6s 6ms/step - loss: 0.0010 - val_loss: 7.7322e-04 Epoch 13/100 874/874 [==============================] - 5s 6ms/step - loss: 0.0010 - val_loss: 7.4863e-04 Epoch 14/100 874/874 [==============================] - 5s 6ms/step - loss: 9.8442e-04 - val_loss: 7.2238e-04 Epoch 15/100 874/874 [==============================] - 5s 6ms/step - loss: 9.6671e-04 - val_loss: 7.3114e-04 Epoch 16/100 874/874 [==============================] - 5s 6ms/step - loss: 9.5128e-04 - val_loss: 7.3311e-04 Epoch 17/100 874/874 [==============================] - 5s 6ms/step - loss: 9.3410e-04 - val_loss: 7.0986e-04 Epoch 18/100 874/874 [==============================] - 5s 6ms/step - loss: 9.2854e-04 - val_loss: 6.9908e-04 Epoch 19/100 874/874 [==============================] - 5s 6ms/step - loss: 9.2073e-04 - val_loss: 7.0861e-04 Epoch 20/100 874/874 [==============================] - 5s 6ms/step - loss: 9.0384e-04 - val_loss: 6.9655e-04 Epoch 21/100 874/874 [==============================] - 6s 6ms/step - loss: 8.9025e-04 - val_loss: 7.3936e-04 Epoch 22/100 874/874 [==============================] - 5s 6ms/step - loss: 8.9678e-04 - val_loss: 7.0053e-04 Epoch 23/100 874/874 [==============================] - 5s 6ms/step - loss: 8.7534e-04 - val_loss: 6.7077e-04 Epoch 24/100 874/874 [==============================] - 5s 6ms/step - loss: 8.6085e-04 - val_loss: 6.4795e-04 Epoch 25/100 874/874 [==============================] - 5s 6ms/step - loss: 8.5612e-04 - val_loss: 6.6252e-04 Epoch 26/100 874/874 [==============================] - 5s 6ms/step - loss: 8.4928e-04 - val_loss: 6.4063e-04 Epoch 27/100 874/874 [==============================] - 5s 6ms/step - loss: 8.5684e-04 - val_loss: 6.8328e-04 Epoch 28/100 874/874 [==============================] - 5s 6ms/step - loss: 8.4724e-04 - val_loss: 6.2614e-04 Epoch 29/100 874/874 [==============================] - 5s 6ms/step - loss: 8.3999e-04 - val_loss: 6.2205e-04 Epoch 30/100 874/874 [==============================] - 5s 6ms/step - loss: 8.3573e-04 - val_loss: 6.2713e-04 Epoch 31/100 874/874 [==============================] - 5s 6ms/step - loss: 8.4324e-04 - val_loss: 6.3614e-04 Epoch 32/100 874/874 [==============================] - 5s 6ms/step - loss: 8.2534e-04 - val_loss: 6.3649e-04 Epoch 33/100 874/874 [==============================] - 5s 6ms/step - loss: 8.4705e-04 - val_loss: 6.2962e-04 Epoch 34/100 874/874 [==============================] - 6s 6ms/step - loss: 8.0842e-04 - val_loss: 6.4383e-04 Epoch 35/100 874/874 [==============================] - 5s 6ms/step - loss: 8.0973e-04 - val_loss: 6.2209e-04 Epoch 36/100 874/874 [==============================] - 5s 6ms/step - loss: 8.0990e-04 - val_loss: 6.3862e-04 Epoch 37/100 874/874 [==============================] - 5s 6ms/step - loss: 8.0353e-04 - val_loss: 6.3069e-04 Epoch 38/100 874/874 [==============================] - 5s 6ms/step - loss: 7.9776e-04 - val_loss: 5.9615e-04 Epoch 39/100 874/874 [==============================] - 5s 6ms/step - loss: 8.0177e-04 - val_loss: 6.0989e-04 Epoch 40/100 874/874 [==============================] - 5s 6ms/step - loss: 7.8557e-04 - val_loss: 6.0616e-04 Epoch 41/100 874/874 [==============================] - 5s 6ms/step - loss: 7.9027e-04 - val_loss: 6.0872e-04 Epoch 42/100 874/874 [==============================] - 5s 6ms/step - loss: 7.8736e-04 - val_loss: 6.1180e-04 Epoch 43/100 874/874 [==============================] - 5s 6ms/step - loss: 7.8195e-04 - val_loss: 5.9739e-04 Epoch 44/100 874/874 [==============================] - 5s 6ms/step - loss: 7.6599e-04 - val_loss: 5.8666e-04 Epoch 45/100 874/874 [==============================] - 5s 6ms/step - loss: 7.6544e-04 - val_loss: 5.6709e-04 Epoch 46/100 874/874 [==============================] - 5s 6ms/step - loss: 7.4982e-04 - val_loss: 5.7691e-04 Epoch 47/100 874/874 [==============================] - 5s 6ms/step - loss: 7.6397e-04 - val_loss: 5.7692e-04 Epoch 48/100 874/874 [==============================] - 5s 6ms/step - loss: 7.5169e-04 - val_loss: 6.0930e-04 Epoch 49/100 874/874 [==============================] - 6s 6ms/step - loss: 7.4694e-04 - val_loss: 5.8129e-04 Epoch 50/100 874/874 [==============================] - 6s 7ms/step - loss: 7.4195e-04 - val_loss: 5.9213e-04 Epoch 51/100 874/874 [==============================] - 6s 6ms/step - loss: 7.4035e-04 - val_loss: 5.4635e-04 Epoch 52/100 874/874 [==============================] - 5s 6ms/step - loss: 7.2791e-04 - val_loss: 5.4027e-04 Epoch 53/100 874/874 [==============================] - 5s 6ms/step - loss: 7.2369e-04 - val_loss: 5.5799e-04 Epoch 54/100 874/874 [==============================] - 5s 6ms/step - loss: 7.1644e-04 - val_loss: 5.2945e-04 Epoch 55/100 874/874 [==============================] - 5s 6ms/step - loss: 7.0756e-04 - val_loss: 5.5523e-04 Epoch 56/100 874/874 [==============================] - 5s 6ms/step - loss: 6.9840e-04 - val_loss: 5.2755e-04 Epoch 57/100 874/874 [==============================] - 5s 6ms/step - loss: 6.9692e-04 - val_loss: 5.2116e-04 Epoch 58/100 874/874 [==============================] - 5s 6ms/step - loss: 6.8390e-04 - val_loss: 5.1952e-04 Epoch 59/100 874/874 [==============================] - 5s 6ms/step - loss: 6.8122e-04 - val_loss: 5.0117e-04 Epoch 60/100 874/874 [==============================] - 5s 6ms/step - loss: 6.7690e-04 - val_loss: 5.7552e-04 Epoch 61/100 874/874 [==============================] - 5s 6ms/step - loss: 6.7243e-04 - val_loss: 5.2231e-04 Epoch 62/100 874/874 [==============================] - 5s 6ms/step - loss: 6.6704e-04 - val_loss: 5.0740e-04 Epoch 63/100 874/874 [==============================] - 5s 6ms/step - loss: 6.7170e-04 - val_loss: 5.1248e-04 Epoch 64/100 874/874 [==============================] - 5s 6ms/step - loss: 6.5592e-04 - val_loss: 5.3998e-04 Epoch 65/100 874/874 [==============================] - 5s 6ms/step - loss: 6.6092e-04 - val_loss: 4.9956e-04 Epoch 66/100 874/874 [==============================] - 5s 6ms/step - loss: 6.4709e-04 - val_loss: 5.0354e-04 Epoch 67/100 874/874 [==============================] - 5s 6ms/step - loss: 6.4331e-04 - val_loss: 5.2170e-04 Epoch 68/100 874/874 [==============================] - 5s 6ms/step - loss: 6.4599e-04 - val_loss: 5.2528e-04 Epoch 69/100 874/874 [==============================] - 5s 6ms/step - loss: 6.5002e-04 - val_loss: 5.0806e-04 Epoch 70/100 874/874 [==============================] - 5s 6ms/step - loss: 6.4167e-04 - val_loss: 5.0605e-04 Epoch 71/100 874/874 [==============================] - 5s 6ms/step - loss: 6.3613e-04 - val_loss: 4.8310e-04 Epoch 72/100 874/874 [==============================] - 5s 6ms/step - loss: 6.2766e-04 - val_loss: 4.8259e-04 Epoch 73/100 874/874 [==============================] - 5s 6ms/step - loss: 6.2663e-04 - val_loss: 5.0207e-04 Epoch 74/100 874/874 [==============================] - 5s 6ms/step - loss: 6.2388e-04 - val_loss: 4.9160e-04 Epoch 75/100 874/874 [==============================] - 5s 6ms/step - loss: 6.2740e-04 - val_loss: 4.9214e-04 Epoch 76/100 874/874 [==============================] - 5s 6ms/step - loss: 6.2222e-04 - val_loss: 4.6732e-04 Epoch 77/100 874/874 [==============================] - 5s 6ms/step - loss: 6.1405e-04 - val_loss: 4.8519e-04 Epoch 78/100 874/874 [==============================] - 5s 6ms/step - loss: 6.1299e-04 - val_loss: 4.7303e-04 Epoch 79/100 874/874 [==============================] - 5s 6ms/step - loss: 6.0544e-04 - val_loss: 4.7223e-04 Epoch 80/100 874/874 [==============================] - 5s 6ms/step - loss: 5.9937e-04 - val_loss: 4.6535e-04 Epoch 81/100 874/874 [==============================] - 5s 6ms/step - loss: 6.0608e-04 - val_loss: 4.7512e-04 Epoch 82/100 874/874 [==============================] - 5s 6ms/step - loss: 6.0709e-04 - val_loss: 4.7058e-04 Epoch 83/100 874/874 [==============================] - 5s 6ms/step - loss: 6.0118e-04 - val_loss: 4.5946e-04 Epoch 84/100 874/874 [==============================] - 5s 6ms/step - loss: 6.0003e-04 - val_loss: 4.6831e-04 Epoch 85/100 874/874 [==============================] - 5s 6ms/step - loss: 5.9464e-04 - val_loss: 4.3954e-04 Epoch 86/100 874/874 [==============================] - 5s 6ms/step - loss: 5.9512e-04 - val_loss: 4.7272e-04 Epoch 87/100 874/874 [==============================] - 5s 6ms/step - loss: 5.8402e-04 - val_loss: 4.6005e-04 Epoch 88/100 874/874 [==============================] - 5s 6ms/step - loss: 5.7621e-04 - val_loss: 4.8966e-04 Epoch 89/100 874/874 [==============================] - 5s 6ms/step - loss: 8.6522e-04 - val_loss: 5.6099e-04 Epoch 90/100 874/874 [==============================] - 5s 6ms/step - loss: 7.1775e-04 - val_loss: 5.1302e-04 Epoch 91/100 874/874 [==============================] - 5s 6ms/step - loss: 6.5573e-04 - val_loss: 4.9460e-04 Epoch 92/100 874/874 [==============================] - 5s 6ms/step - loss: 6.1848e-04 - val_loss: 4.6743e-04 Epoch 93/100 874/874 [==============================] - 5s 6ms/step - loss: 5.9688e-04 - val_loss: 4.6066e-04
fig = px.line(history_2.history,
labels={'value': 'Loss',
'index': 'Epoch'})
fig.update_layout(legend=dict(title='',
yanchor="top", y=0.99,
xanchor="right", x=0.99))
fig.show()
train_preds_2 = model_2.predict(X_train[window_size:])
test_preds_2 = model_2.predict(X_test[window_size:])
871/871 [==============================] - 3s 3ms/step 213/213 [==============================] - 1s 3ms/step
preds_2 = np.concatenate((train_preds_2, test_preds_2), axis=0)
preds_2 = scaler.inverse_transform(preds_2)
preds_2 = preds_2.reshape(-1)
y_train_2 = scaler.inverse_transform(y_train)
y_test_2 = scaler.inverse_transform(y_test)
y_2 = np.concatenate((y_train_2[window_size:].reshape(-1), y_test_2[window_size:].reshape(-1)), axis=0)
results_2_df = energy_df['time'][2 * window_size:train.shape[0]]
results_2_df = pd.DataFrame(np.concatenate([energy_df['time'][2 * window_size:train.shape[0]],
energy_df['time'][train.shape[0] + 2 * window_size:]]))
results_2_df.columns = ['time']
results_2_df['preds'] = preds_2
results_2_df['y'] = y_2
fig = px.line(results_2_df, x='time', y=['preds', 'y'],
labels={'value': 'Total Demand',
'time': 'time'})
fig.update_layout(legend=dict(title='',
yanchor="top", y=0.99,
xanchor="right", x=0.99))
fig.show()
fig = px.line(results_2_df[-300:], x='time', y=['preds', 'y'],
labels={'value': 'Total Demand',
'time': 'time'})
fig.update_layout(legend=dict(title='',
yanchor="top", y=0.99,
xanchor="right", x=0.99))
fig.show()
print(f'MAPE: {mean_absolute_percentage_error(y_2, preds_2):.2%}')
print(f'MSE: {(mean_squared_error(y_2, preds_2)**0.5):.0f}')
MAPE: 1.07% MSE: 486
early_stopping_cb = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5)
window_size = 168 # 7 days
X_train, y_train = create_sequences(train, window_size)
X_test, y_test = create_sequences(test, window_size)
print(f'X_train shape: {X_train.shape}, y_train shape: {y_train.shape}')
print(f'X_test shape: {X_test.shape}, y_test shape: {y_test.shape}')
print(f'LSTM input shape: {X_train.shape[1], X_train.shape[2]}')
X_train shape: (27883, 168, 1), y_train shape: (27883, 1) X_test shape: (6845, 168, 1), y_test shape: (6845, 1) LSTM input shape: (168, 1)
input_shape = (X_train.shape[1], X_train.shape[2])
lstm_units = 100
dropout = 0.2
output_units = 1
model_2 = Sequential()
model_2.add(LSTM(lstm_units, input_shape=input_shape))
model_2.add(Dense(100))
model_2.add(Dropout(dropout))
model_2.add(Dense(output_units))
model_2.compile(optimizer='adam', loss='mean_squared_error',)
model_2.summary()
Model: "sequential_5"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_5 (LSTM) (None, 100) 40800
dense_5 (Dense) (None, 100) 10100
dropout_5 (Dropout) (None, 100) 0
dense_6 (Dense) (None, 1) 101
=================================================================
Total params: 51001 (199.22 KB)
Trainable params: 51001 (199.22 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
history_2 = model_2.fit(X_train, y_train,
epochs=100,
batch_size=32,
validation_data=(X_test, y_test),
callbacks=[early_stopping_cb],
shuffle=False
)
Epoch 1/100 872/872 [==============================] - 9s 9ms/step - loss: 0.0061 - val_loss: 0.0017 Epoch 2/100 872/872 [==============================] - 7s 8ms/step - loss: 0.0021 - val_loss: 0.0012 Epoch 3/100 872/872 [==============================] - 7s 8ms/step - loss: 0.0017 - val_loss: 0.0010 Epoch 4/100 872/872 [==============================] - 7s 8ms/step - loss: 0.0015 - val_loss: 0.0010 Epoch 5/100 872/872 [==============================] - 7s 8ms/step - loss: 0.0014 - val_loss: 9.3442e-04 Epoch 6/100 872/872 [==============================] - 7s 8ms/step - loss: 0.0013 - val_loss: 9.2799e-04 Epoch 7/100 872/872 [==============================] - 7s 8ms/step - loss: 0.0012 - val_loss: 8.6570e-04 Epoch 8/100 872/872 [==============================] - 7s 8ms/step - loss: 0.0011 - val_loss: 8.6545e-04 Epoch 9/100 872/872 [==============================] - 7s 8ms/step - loss: 0.0011 - val_loss: 7.8698e-04 Epoch 10/100 872/872 [==============================] - 7s 8ms/step - loss: 0.0010 - val_loss: 7.5653e-04 Epoch 11/100 872/872 [==============================] - 7s 8ms/step - loss: 9.9612e-04 - val_loss: 7.5586e-04 Epoch 12/100 872/872 [==============================] - 7s 8ms/step - loss: 9.5986e-04 - val_loss: 7.1982e-04 Epoch 13/100 872/872 [==============================] - 7s 8ms/step - loss: 9.3256e-04 - val_loss: 7.1147e-04 Epoch 14/100 872/872 [==============================] - 7s 8ms/step - loss: 9.2202e-04 - val_loss: 7.2294e-04 Epoch 15/100 872/872 [==============================] - 7s 8ms/step - loss: 8.9818e-04 - val_loss: 6.8372e-04 Epoch 16/100 872/872 [==============================] - 7s 8ms/step - loss: 8.9190e-04 - val_loss: 7.1847e-04 Epoch 17/100 872/872 [==============================] - 7s 8ms/step - loss: 8.7790e-04 - val_loss: 7.1328e-04 Epoch 18/100 872/872 [==============================] - 7s 8ms/step - loss: 8.7134e-04 - val_loss: 6.8829e-04 Epoch 19/100 872/872 [==============================] - 7s 8ms/step - loss: 8.6204e-04 - val_loss: 6.7780e-04 Epoch 20/100 872/872 [==============================] - 7s 8ms/step - loss: 8.5721e-04 - val_loss: 6.7824e-04 Epoch 21/100 872/872 [==============================] - 7s 8ms/step - loss: 8.4715e-04 - val_loss: 6.6340e-04 Epoch 22/100 872/872 [==============================] - 7s 8ms/step - loss: 8.4379e-04 - val_loss: 6.4703e-04 Epoch 23/100 872/872 [==============================] - 7s 8ms/step - loss: 8.2432e-04 - val_loss: 6.7571e-04 Epoch 24/100 872/872 [==============================] - 7s 8ms/step - loss: 8.3544e-04 - val_loss: 6.4774e-04 Epoch 25/100 872/872 [==============================] - 7s 8ms/step - loss: 8.1369e-04 - val_loss: 6.2289e-04 Epoch 26/100 872/872 [==============================] - 7s 8ms/step - loss: 7.9759e-04 - val_loss: 6.6223e-04 Epoch 27/100 872/872 [==============================] - 7s 8ms/step - loss: 8.1553e-04 - val_loss: 6.2704e-04 Epoch 28/100 872/872 [==============================] - 7s 8ms/step - loss: 7.9826e-04 - val_loss: 6.2459e-04 Epoch 29/100 872/872 [==============================] - 7s 8ms/step - loss: 7.7415e-04 - val_loss: 6.0997e-04 Epoch 30/100 872/872 [==============================] - 7s 8ms/step - loss: 7.6264e-04 - val_loss: 5.8576e-04 Epoch 31/100 872/872 [==============================] - 7s 8ms/step - loss: 7.6289e-04 - val_loss: 5.9197e-04 Epoch 32/100 872/872 [==============================] - 7s 8ms/step - loss: 7.6074e-04 - val_loss: 5.7698e-04 Epoch 33/100 872/872 [==============================] - 7s 8ms/step - loss: 7.3116e-04 - val_loss: 5.6639e-04 Epoch 34/100 872/872 [==============================] - 7s 8ms/step - loss: 7.2972e-04 - val_loss: 5.7676e-04 Epoch 35/100 872/872 [==============================] - 7s 8ms/step - loss: 7.3027e-04 - val_loss: 5.5905e-04 Epoch 36/100 872/872 [==============================] - 7s 8ms/step - loss: 7.0745e-04 - val_loss: 5.3526e-04 Epoch 37/100 872/872 [==============================] - 7s 8ms/step - loss: 7.1086e-04 - val_loss: 5.5393e-04 Epoch 38/100 872/872 [==============================] - 7s 8ms/step - loss: 7.0929e-04 - val_loss: 5.2452e-04 Epoch 39/100 872/872 [==============================] - 7s 8ms/step - loss: 6.9984e-04 - val_loss: 5.8768e-04 Epoch 40/100 872/872 [==============================] - 7s 8ms/step - loss: 6.9475e-04 - val_loss: 5.2426e-04 Epoch 41/100 872/872 [==============================] - 7s 8ms/step - loss: 6.8004e-04 - val_loss: 5.1751e-04 Epoch 42/100 872/872 [==============================] - 7s 8ms/step - loss: 6.6680e-04 - val_loss: 5.2021e-04 Epoch 43/100 872/872 [==============================] - 7s 8ms/step - loss: 6.8023e-04 - val_loss: 7.3529e-04 Epoch 44/100 872/872 [==============================] - 7s 8ms/step - loss: 6.6986e-04 - val_loss: 5.8657e-04 Epoch 45/100 872/872 [==============================] - 7s 8ms/step - loss: 6.6985e-04 - val_loss: 5.0757e-04 Epoch 46/100 872/872 [==============================] - 7s 8ms/step - loss: 6.5846e-04 - val_loss: 5.0377e-04 Epoch 47/100 872/872 [==============================] - 7s 8ms/step - loss: 6.6157e-04 - val_loss: 4.9241e-04 Epoch 48/100 872/872 [==============================] - 7s 8ms/step - loss: 6.4400e-04 - val_loss: 5.0212e-04 Epoch 49/100 872/872 [==============================] - 7s 8ms/step - loss: 6.4179e-04 - val_loss: 4.8881e-04 Epoch 50/100 872/872 [==============================] - 7s 8ms/step - loss: 6.3427e-04 - val_loss: 5.0020e-04 Epoch 51/100 872/872 [==============================] - 7s 8ms/step - loss: 6.3233e-04 - val_loss: 5.0102e-04 Epoch 52/100 872/872 [==============================] - 7s 8ms/step - loss: 6.2725e-04 - val_loss: 4.8205e-04 Epoch 53/100 872/872 [==============================] - 7s 8ms/step - loss: 6.1915e-04 - val_loss: 7.1115e-04 Epoch 54/100 872/872 [==============================] - 7s 8ms/step - loss: 6.2866e-04 - val_loss: 4.9798e-04 Epoch 55/100 872/872 [==============================] - 7s 8ms/step - loss: 6.0131e-04 - val_loss: 4.9960e-04 Epoch 56/100 872/872 [==============================] - 7s 8ms/step - loss: 6.1335e-04 - val_loss: 5.8171e-04 Epoch 57/100 872/872 [==============================] - 7s 8ms/step - loss: 6.0016e-04 - val_loss: 4.7589e-04 Epoch 58/100 872/872 [==============================] - 7s 8ms/step - loss: 5.9846e-04 - val_loss: 6.1848e-04 Epoch 59/100 872/872 [==============================] - 7s 8ms/step - loss: 5.8985e-04 - val_loss: 5.8010e-04 Epoch 60/100 872/872 [==============================] - 7s 8ms/step - loss: 6.0122e-04 - val_loss: 4.8771e-04 Epoch 61/100 872/872 [==============================] - 7s 8ms/step - loss: 5.8758e-04 - val_loss: 4.9891e-04 Epoch 62/100 872/872 [==============================] - 7s 8ms/step - loss: 5.8408e-04 - val_loss: 6.0912e-04 Epoch 63/100 872/872 [==============================] - 7s 8ms/step - loss: 5.8356e-04 - val_loss: 5.0292e-04 Epoch 64/100 872/872 [==============================] - 7s 8ms/step - loss: 5.7299e-04 - val_loss: 4.6436e-04 Epoch 65/100 872/872 [==============================] - 7s 8ms/step - loss: 5.6254e-04 - val_loss: 4.6789e-04 Epoch 66/100 872/872 [==============================] - 7s 8ms/step - loss: 5.7198e-04 - val_loss: 4.8011e-04 Epoch 67/100 872/872 [==============================] - 7s 8ms/step - loss: 5.6165e-04 - val_loss: 5.0773e-04 Epoch 68/100 872/872 [==============================] - 7s 8ms/step - loss: 5.7093e-04 - val_loss: 4.8751e-04 Epoch 69/100 872/872 [==============================] - 7s 8ms/step - loss: 5.4869e-04 - val_loss: 6.0185e-04 Epoch 70/100 872/872 [==============================] - 7s 8ms/step - loss: 5.4610e-04 - val_loss: 5.8129e-04 Epoch 71/100 872/872 [==============================] - 7s 8ms/step - loss: 5.5588e-04 - val_loss: 4.5204e-04 Epoch 72/100 872/872 [==============================] - 7s 8ms/step - loss: 5.4368e-04 - val_loss: 4.8601e-04 Epoch 73/100 872/872 [==============================] - 7s 8ms/step - loss: 5.2918e-04 - val_loss: 5.0108e-04 Epoch 74/100 872/872 [==============================] - 7s 8ms/step - loss: 6.7372e-04 - val_loss: 4.8221e-04 Epoch 75/100 872/872 [==============================] - 7s 8ms/step - loss: 5.7110e-04 - val_loss: 4.3620e-04 Epoch 76/100 872/872 [==============================] - 7s 8ms/step - loss: 5.3886e-04 - val_loss: 4.3420e-04 Epoch 77/100 872/872 [==============================] - 7s 8ms/step - loss: 5.1788e-04 - val_loss: 4.7814e-04 Epoch 78/100 872/872 [==============================] - 7s 8ms/step - loss: 5.2099e-04 - val_loss: 4.2110e-04 Epoch 79/100 872/872 [==============================] - 7s 8ms/step - loss: 5.1242e-04 - val_loss: 4.1585e-04 Epoch 80/100 872/872 [==============================] - 7s 8ms/step - loss: 5.1535e-04 - val_loss: 4.1733e-04 Epoch 81/100 872/872 [==============================] - 7s 8ms/step - loss: 5.0801e-04 - val_loss: 4.4820e-04 Epoch 82/100 872/872 [==============================] - 7s 8ms/step - loss: 5.1691e-04 - val_loss: 4.3067e-04 Epoch 83/100 872/872 [==============================] - 7s 8ms/step - loss: 5.0841e-04 - val_loss: 5.8114e-04 Epoch 84/100 872/872 [==============================] - 7s 8ms/step - loss: 5.0785e-04 - val_loss: 4.7495e-04 Epoch 85/100 872/872 [==============================] - 7s 8ms/step - loss: 5.0898e-04 - val_loss: 4.6176e-04 Epoch 86/100 872/872 [==============================] - 7s 8ms/step - loss: 5.0787e-04 - val_loss: 5.0974e-04 Epoch 87/100 872/872 [==============================] - 7s 8ms/step - loss: 5.0074e-04 - val_loss: 5.1952e-04 Epoch 88/100 872/872 [==============================] - 7s 8ms/step - loss: 5.1271e-04 - val_loss: 4.8139e-04 Epoch 89/100 872/872 [==============================] - 7s 8ms/step - loss: 4.9675e-04 - val_loss: 4.7912e-04 Epoch 90/100 872/872 [==============================] - 7s 8ms/step - loss: 5.0686e-04 - val_loss: 6.0435e-04 Epoch 91/100 872/872 [==============================] - 7s 8ms/step - loss: 4.8952e-04 - val_loss: 4.6754e-04 Epoch 92/100 872/872 [==============================] - 7s 8ms/step - loss: 4.9806e-04 - val_loss: 4.7857e-04 Epoch 93/100 872/872 [==============================] - 7s 8ms/step - loss: 4.7649e-04 - val_loss: 6.2136e-04 Epoch 94/100 872/872 [==============================] - 7s 8ms/step - loss: 4.9192e-04 - val_loss: 4.3842e-04 Epoch 95/100 872/872 [==============================] - 7s 8ms/step - loss: 4.8579e-04 - val_loss: 4.6455e-04 Epoch 96/100 872/872 [==============================] - 7s 8ms/step - loss: 4.7920e-04 - val_loss: 6.3869e-04 Epoch 97/100 872/872 [==============================] - 7s 8ms/step - loss: 5.0143e-04 - val_loss: 4.7234e-04 Epoch 98/100 872/872 [==============================] - 7s 8ms/step - loss: 4.7066e-04 - val_loss: 4.5561e-04 Epoch 99/100 872/872 [==============================] - 7s 8ms/step - loss: 4.7658e-04 - val_loss: 4.8718e-04 Epoch 100/100 872/872 [==============================] - 7s 8ms/step - loss: 4.7009e-04 - val_loss: 4.2702e-04
fig = px.line(history_2.history,
labels={'value': 'Loss',
'index': 'Epoch'})
fig.update_layout(legend=dict(title='',
yanchor="top", y=0.99,
xanchor="right", x=0.99))
fig.show()
train_preds_2 = model_2.predict(X_train[window_size:])
test_preds_2 = model_2.predict(X_test[window_size:])
867/867 [==============================] - 3s 3ms/step 209/209 [==============================] - 1s 3ms/step
preds_2 = np.concatenate((train_preds_2, test_preds_2), axis=0)
preds_2 = scaler.inverse_transform(preds_2)
preds_2 = preds_2.reshape(-1)
y_train_2 = scaler.inverse_transform(y_train)
y_test_2 = scaler.inverse_transform(y_test)
y_2 = np.concatenate((y_train_2[window_size:].reshape(-1), y_test_2[window_size:].reshape(-1)), axis=0)
results_2_df = energy_df['time'][2 * window_size:train.shape[0]]
results_2_df = pd.DataFrame(np.concatenate([energy_df['time'][2 * window_size:train.shape[0]],
energy_df['time'][train.shape[0] + 2 * window_size:]]))
results_2_df.columns = ['time']
results_2_df['preds'] = preds_2
results_2_df['y'] = y_2
fig = px.line(results_2_df, x='time', y=['preds', 'y'],
labels={'value': 'Total Demand',
'time': 'time'})
fig.update_layout(legend=dict(title='',
yanchor="top", y=0.99,
xanchor="right", x=0.99))
fig.show()
fig = px.line(results_2_df[-300:], x='time', y=['preds', 'y'],
labels={'value': 'Total Demand',
'time': 'time'})
fig.update_layout(legend=dict(title='',
yanchor="top", y=0.99,
xanchor="right", x=0.99))
fig.show()
print(f'MAPE: {mean_absolute_percentage_error(y_2, preds_2):.2%}')
print(f'MSE: {(mean_squared_error(y_2, preds_2)**0.5):.0f}')
MAPE: 1.10% MSE: 479
early_stopping_cb = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5)
initial_learning_rate = 0.001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate,
decay_steps=872,
decay_rate=0.98)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
window_size = 168 # 7 days
X_train, y_train = create_sequences(train, window_size)
X_test, y_test = create_sequences(test, window_size)
print(f'X_train shape: {X_train.shape}, y_train shape: {y_train.shape}')
print(f'X_test shape: {X_test.shape}, y_test shape: {y_test.shape}')
print(f'LSTM input shape: {X_train.shape[1], X_train.shape[2]}')
X_train shape: (27883, 168, 1), y_train shape: (27883, 1) X_test shape: (6845, 168, 1), y_test shape: (6845, 1) LSTM input shape: (168, 1)
input_shape = (X_train.shape[1], X_train.shape[2])
lstm_units = 100
dropout = 0.2
output_units = 1
model_2 = Sequential()
model_2.add(LSTM(lstm_units, input_shape=input_shape))
model_2.add(Dense(100))
model_2.add(Dropout(dropout))
model_2.add(Dense(output_units))
model_2.compile(optimizer=optimizer, loss='mean_squared_error',)
model_2.summary()
Model: "sequential_7"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_7 (LSTM) (None, 100) 40800
dense_9 (Dense) (None, 100) 10100
dropout_7 (Dropout) (None, 100) 0
dense_10 (Dense) (None, 1) 101
=================================================================
Total params: 51001 (199.22 KB)
Trainable params: 51001 (199.22 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
history_2 = model_2.fit(X_train, y_train,
epochs=100,
batch_size=32,
validation_data=(X_test, y_test),
callbacks=[early_stopping_cb],
shuffle=False
)
Epoch 1/100 872/872 [==============================] - 10s 10ms/step - loss: 0.0064 - val_loss: 0.0016 Epoch 2/100 872/872 [==============================] - 8s 9ms/step - loss: 0.0020 - val_loss: 0.0015 Epoch 3/100 872/872 [==============================] - 8s 9ms/step - loss: 0.0017 - val_loss: 0.0011 Epoch 4/100 872/872 [==============================] - 8s 9ms/step - loss: 0.0015 - val_loss: 9.8936e-04 Epoch 5/100 872/872 [==============================] - 8s 9ms/step - loss: 0.0014 - val_loss: 9.6060e-04 Epoch 6/100 872/872 [==============================] - 8s 9ms/step - loss: 0.0013 - val_loss: 9.4303e-04 Epoch 7/100 872/872 [==============================] - 8s 9ms/step - loss: 0.0012 - val_loss: 8.8644e-04 Epoch 8/100 872/872 [==============================] - 8s 9ms/step - loss: 0.0012 - val_loss: 8.6420e-04 Epoch 9/100 872/872 [==============================] - 8s 9ms/step - loss: 0.0011 - val_loss: 8.4058e-04 Epoch 10/100 872/872 [==============================] - 8s 9ms/step - loss: 0.0011 - val_loss: 8.6421e-04 Epoch 11/100 872/872 [==============================] - 8s 9ms/step - loss: 0.0010 - val_loss: 7.7104e-04 Epoch 12/100 872/872 [==============================] - 8s 9ms/step - loss: 9.7997e-04 - val_loss: 7.2207e-04 Epoch 13/100 872/872 [==============================] - 8s 9ms/step - loss: 9.5346e-04 - val_loss: 7.1436e-04 Epoch 14/100 872/872 [==============================] - 8s 9ms/step - loss: 9.2434e-04 - val_loss: 7.3744e-04 Epoch 15/100 872/872 [==============================] - 8s 9ms/step - loss: 9.0473e-04 - val_loss: 7.3134e-04 Epoch 16/100 872/872 [==============================] - 8s 9ms/step - loss: 8.9325e-04 - val_loss: 7.0345e-04 Epoch 17/100 872/872 [==============================] - 8s 9ms/step - loss: 8.7338e-04 - val_loss: 7.1050e-04 Epoch 18/100 872/872 [==============================] - 8s 9ms/step - loss: 8.7208e-04 - val_loss: 7.4177e-04 Epoch 19/100 872/872 [==============================] - 8s 9ms/step - loss: 8.6153e-04 - val_loss: 7.3220e-04 Epoch 20/100 872/872 [==============================] - 8s 9ms/step - loss: 8.5987e-04 - val_loss: 7.2549e-04 Epoch 21/100 872/872 [==============================] - 8s 9ms/step - loss: 8.4581e-04 - val_loss: 7.0832e-04 Epoch 22/100 872/872 [==============================] - 8s 9ms/step - loss: 8.3788e-04 - val_loss: 6.8305e-04 Epoch 23/100 872/872 [==============================] - 8s 9ms/step - loss: 8.3261e-04 - val_loss: 6.8250e-04 Epoch 24/100 872/872 [==============================] - 8s 9ms/step - loss: 8.3007e-04 - val_loss: 6.7569e-04 Epoch 25/100 872/872 [==============================] - 8s 9ms/step - loss: 8.0565e-04 - val_loss: 6.6970e-04 Epoch 26/100 872/872 [==============================] - 8s 9ms/step - loss: 7.9565e-04 - val_loss: 6.3497e-04 Epoch 27/100 872/872 [==============================] - 8s 9ms/step - loss: 7.9389e-04 - val_loss: 6.3223e-04 Epoch 28/100 872/872 [==============================] - 8s 9ms/step - loss: 7.8186e-04 - val_loss: 6.3832e-04 Epoch 29/100 872/872 [==============================] - 8s 9ms/step - loss: 7.7624e-04 - val_loss: 6.1546e-04 Epoch 30/100 872/872 [==============================] - 8s 9ms/step - loss: 7.6395e-04 - val_loss: 6.4588e-04 Epoch 31/100 872/872 [==============================] - 8s 9ms/step - loss: 7.5298e-04 - val_loss: 6.2292e-04 Epoch 32/100 872/872 [==============================] - 8s 9ms/step - loss: 7.5445e-04 - val_loss: 6.0974e-04 Epoch 33/100 872/872 [==============================] - 8s 9ms/step - loss: 7.4412e-04 - val_loss: 5.9042e-04 Epoch 34/100 872/872 [==============================] - 8s 9ms/step - loss: 7.4185e-04 - val_loss: 6.0740e-04 Epoch 35/100 872/872 [==============================] - 8s 9ms/step - loss: 7.3253e-04 - val_loss: 6.0781e-04 Epoch 36/100 872/872 [==============================] - 8s 9ms/step - loss: 7.2194e-04 - val_loss: 6.0533e-04 Epoch 37/100 872/872 [==============================] - 8s 9ms/step - loss: 7.2462e-04 - val_loss: 6.1504e-04 Epoch 38/100 872/872 [==============================] - 8s 9ms/step - loss: 7.2337e-04 - val_loss: 5.6868e-04 Epoch 39/100 872/872 [==============================] - 8s 9ms/step - loss: 7.1001e-04 - val_loss: 5.7941e-04 Epoch 40/100 872/872 [==============================] - 8s 9ms/step - loss: 7.0523e-04 - val_loss: 5.6254e-04 Epoch 41/100 872/872 [==============================] - 8s 9ms/step - loss: 6.9292e-04 - val_loss: 5.5739e-04 Epoch 42/100 872/872 [==============================] - 8s 9ms/step - loss: 7.0246e-04 - val_loss: 5.7103e-04 Epoch 43/100 872/872 [==============================] - 8s 9ms/step - loss: 6.9020e-04 - val_loss: 6.0808e-04 Epoch 44/100 872/872 [==============================] - 8s 9ms/step - loss: 6.8426e-04 - val_loss: 5.8359e-04 Epoch 45/100 872/872 [==============================] - 8s 9ms/step - loss: 6.8770e-04 - val_loss: 5.6975e-04 Epoch 46/100 872/872 [==============================] - 8s 9ms/step - loss: 6.7455e-04 - val_loss: 5.9459e-04 Epoch 47/100 872/872 [==============================] - 8s 9ms/step - loss: 6.7596e-04 - val_loss: 5.5663e-04 Epoch 48/100 872/872 [==============================] - 8s 9ms/step - loss: 6.6155e-04 - val_loss: 5.6253e-04 Epoch 49/100 872/872 [==============================] - 8s 10ms/step - loss: 6.6624e-04 - val_loss: 5.3538e-04 Epoch 50/100 872/872 [==============================] - 8s 9ms/step - loss: 6.5752e-04 - val_loss: 5.3344e-04 Epoch 51/100 872/872 [==============================] - 8s 9ms/step - loss: 6.6181e-04 - val_loss: 5.5257e-04 Epoch 52/100 872/872 [==============================] - 8s 9ms/step - loss: 6.5008e-04 - val_loss: 5.5096e-04 Epoch 53/100 872/872 [==============================] - 8s 9ms/step - loss: 6.4760e-04 - val_loss: 5.5167e-04 Epoch 54/100 872/872 [==============================] - 8s 9ms/step - loss: 6.3986e-04 - val_loss: 5.3518e-04 Epoch 55/100 872/872 [==============================] - 8s 9ms/step - loss: 6.3526e-04 - val_loss: 5.2849e-04 Epoch 56/100 872/872 [==============================] - 8s 9ms/step - loss: 6.2712e-04 - val_loss: 5.1680e-04 Epoch 57/100 872/872 [==============================] - 8s 9ms/step - loss: 6.3451e-04 - val_loss: 5.1983e-04 Epoch 58/100 872/872 [==============================] - 8s 9ms/step - loss: 6.2853e-04 - val_loss: 5.1043e-04 Epoch 59/100 872/872 [==============================] - 8s 10ms/step - loss: 6.2075e-04 - val_loss: 5.1292e-04 Epoch 60/100 872/872 [==============================] - 8s 9ms/step - loss: 6.2269e-04 - val_loss: 5.1104e-04 Epoch 61/100 872/872 [==============================] - 8s 9ms/step - loss: 6.1680e-04 - val_loss: 5.0387e-04 Epoch 62/100 872/872 [==============================] - 8s 9ms/step - loss: 6.1156e-04 - val_loss: 4.9405e-04 Epoch 63/100 872/872 [==============================] - 8s 9ms/step - loss: 6.0879e-04 - val_loss: 4.8431e-04 Epoch 64/100 872/872 [==============================] - 8s 9ms/step - loss: 6.0480e-04 - val_loss: 4.7672e-04 Epoch 65/100 872/872 [==============================] - 8s 9ms/step - loss: 6.0366e-04 - val_loss: 4.8459e-04 Epoch 66/100 872/872 [==============================] - 8s 9ms/step - loss: 6.0028e-04 - val_loss: 5.0037e-04 Epoch 67/100 872/872 [==============================] - 8s 9ms/step - loss: 5.9142e-04 - val_loss: 4.7371e-04 Epoch 68/100 872/872 [==============================] - 8s 9ms/step - loss: 5.8789e-04 - val_loss: 4.8556e-04 Epoch 69/100 872/872 [==============================] - 8s 9ms/step - loss: 5.8947e-04 - val_loss: 4.8249e-04 Epoch 70/100 872/872 [==============================] - 8s 9ms/step - loss: 5.8346e-04 - val_loss: 4.6702e-04 Epoch 71/100 872/872 [==============================] - 8s 10ms/step - loss: 5.8462e-04 - val_loss: 4.7572e-04 Epoch 72/100 872/872 [==============================] - 8s 9ms/step - loss: 5.8360e-04 - val_loss: 4.6061e-04 Epoch 73/100 872/872 [==============================] - 8s 9ms/step - loss: 5.7420e-04 - val_loss: 4.6818e-04 Epoch 74/100 872/872 [==============================] - 8s 10ms/step - loss: 5.6609e-04 - val_loss: 4.5650e-04 Epoch 75/100 872/872 [==============================] - 8s 9ms/step - loss: 5.7237e-04 - val_loss: 4.5165e-04 Epoch 76/100 872/872 [==============================] - 8s 10ms/step - loss: 5.6478e-04 - val_loss: 4.5861e-04 Epoch 77/100 872/872 [==============================] - 8s 10ms/step - loss: 5.6580e-04 - val_loss: 4.5325e-04 Epoch 78/100 872/872 [==============================] - 8s 9ms/step - loss: 5.6018e-04 - val_loss: 4.5866e-04 Epoch 79/100 872/872 [==============================] - 8s 9ms/step - loss: 5.5073e-04 - val_loss: 4.5026e-04 Epoch 80/100 872/872 [==============================] - 8s 10ms/step - loss: 5.5261e-04 - val_loss: 4.5353e-04 Epoch 81/100 872/872 [==============================] - 8s 9ms/step - loss: 5.4944e-04 - val_loss: 4.4725e-04 Epoch 82/100 872/872 [==============================] - 8s 10ms/step - loss: 5.4593e-04 - val_loss: 4.4598e-04 Epoch 83/100 872/872 [==============================] - 8s 9ms/step - loss: 5.3942e-04 - val_loss: 4.3804e-04 Epoch 84/100 872/872 [==============================] - 8s 9ms/step - loss: 5.4099e-04 - val_loss: 4.3517e-04 Epoch 85/100 872/872 [==============================] - 8s 9ms/step - loss: 5.3540e-04 - val_loss: 4.3824e-04 Epoch 86/100 872/872 [==============================] - 8s 9ms/step - loss: 5.3316e-04 - val_loss: 4.2923e-04 Epoch 87/100 872/872 [==============================] - 8s 9ms/step - loss: 5.3246e-04 - val_loss: 4.3189e-04 Epoch 88/100 872/872 [==============================] - 8s 9ms/step - loss: 5.2682e-04 - val_loss: 4.2537e-04 Epoch 89/100 872/872 [==============================] - 8s 10ms/step - loss: 5.2526e-04 - val_loss: 4.2701e-04 Epoch 90/100 872/872 [==============================] - 8s 10ms/step - loss: 5.2542e-04 - val_loss: 4.3034e-04 Epoch 91/100 872/872 [==============================] - 8s 10ms/step - loss: 5.2365e-04 - val_loss: 4.2167e-04 Epoch 92/100 872/872 [==============================] - 8s 9ms/step - loss: 5.1821e-04 - val_loss: 4.1651e-04 Epoch 93/100 872/872 [==============================] - 9s 10ms/step - loss: 5.1823e-04 - val_loss: 4.1956e-04 Epoch 94/100 872/872 [==============================] - 8s 9ms/step - loss: 5.1461e-04 - val_loss: 4.2219e-04 Epoch 95/100 872/872 [==============================] - 8s 9ms/step - loss: 5.1395e-04 - val_loss: 4.1786e-04 Epoch 96/100 872/872 [==============================] - 8s 9ms/step - loss: 5.1072e-04 - val_loss: 4.1975e-04 Epoch 97/100 872/872 [==============================] - 8s 10ms/step - loss: 4.9847e-04 - val_loss: 4.2978e-04 Epoch 98/100 872/872 [==============================] - 8s 9ms/step - loss: 5.0387e-04 - val_loss: 4.1440e-04 Epoch 99/100 872/872 [==============================] - 8s 9ms/step - loss: 5.0225e-04 - val_loss: 4.1124e-04 Epoch 100/100 872/872 [==============================] - 9s 10ms/step - loss: 4.9718e-04 - val_loss: 4.0953e-04
fig = px.line(history_2.history,
labels={'value': 'Loss',
'index': 'Epoch'})
fig.update_layout(legend=dict(title='',
yanchor="top", y=0.99,
xanchor="right", x=0.99))
fig.show()
train_preds_2 = model_2.predict(X_train[window_size:])
test_preds_2 = model_2.predict(X_test[window_size:])
867/867 [==============================] - 4s 4ms/step 209/209 [==============================] - 1s 4ms/step
preds_2 = np.concatenate((train_preds_2, test_preds_2), axis=0)
preds_2 = scaler.inverse_transform(preds_2)
preds_2 = preds_2.reshape(-1)
y_train_2 = scaler.inverse_transform(y_train)
y_test_2 = scaler.inverse_transform(y_test)
y_2 = np.concatenate((y_train_2[window_size:].reshape(-1), y_test_2[window_size:].reshape(-1)), axis=0)
results_2_df = energy_df['time'][2 * window_size:train.shape[0]]
results_2_df = pd.DataFrame(np.concatenate([energy_df['time'][2 * window_size:train.shape[0]],
energy_df['time'][train.shape[0] + 2 * window_size:]]))
results_2_df.columns = ['time']
results_2_df['preds'] = preds_2
results_2_df['y'] = y_2
fig = px.line(results_2_df, x='time', y=['preds', 'y'],
labels={'value': 'Total Demand',
'time': 'time'})
fig.update_layout(legend=dict(title='',
yanchor="top", y=0.99,
xanchor="right", x=0.99))
fig.show()
fig = px.line(results_2_df[-300:], x='time', y=['preds', 'y'],
labels={'value': 'Total Demand',
'time': 'time'})
fig.update_layout(legend=dict(title='',
yanchor="top", y=0.99,
xanchor="right", x=0.99))
fig.show()
print(f'MAPE: {mean_absolute_percentage_error(y_2, preds_2):.2%}')
print(f'MSE: {(mean_squared_error(y_2, preds_2)**0.5):.0f}')
MAPE: 1.03% MSE: 465
5. Results¶
Baseline¶
The first model is a simple 30 period moving average that I used to establish an easily beatable baseline. Looking at the zoomed in plot of predicted vs actuals, we can see that this model captures the macro trend fairly well, but completely fails to capture the magnitude of the day-to-day and intra-day fluctuations in power demand. This would be useful for predicting trends in demand on a quarterly or yearly basis, but is not useful for understanding changes from day to night or any other transients.
With a MAPE of 11.5%, we've got a ways to go to beat the target of 1.1%.
Prophet¶
The Prophet package gets mixed reviews in the data science crowd. Some people love it because it is very easy to use, while others hate it precisely because its ease of use leads to misinterpretation of the results it provides. Regardless of your point of view, Prophet does perform well in certain scenarios, like this one, where there is a strong cyclic pattern to the data as well as macro trends. From the visualizations, we can see that Prophet has done a good job capturing the macro trend and has produced a reasonable-looking forecast. Plotting Prophet's predictions against actuals and zooming in, we can see that Prophet has captured the "double-hump" daily pattern and has the flexibility to predict the full magnitude of the daily fluctuations, but simply misses the mark on certain days.
MAPE of 7.02% is an improvement over the simple moving average, but nowhere near beating the target. Prophet has the ability to do multi-variate time series, which would give it a better shot, but to keep things fair with the other models, we will keep this analysis to the univariate domain.
LSTM 1¶
This first iteration of an LSTM model is very basic, just making sure all the model building, training and evaluation pipelines are working properly. A key part of preparing data for an LSTM is the creation of sequences of data for the LSTM to learn. In this iteration, we give it a lookback window of 1, which is essentially no lookback. We build the model with 10 LSTM units, a 0.2 Dropout layer and a Dense layer with one output unit for our next period prediction. We use the Adam optimizer and MSE for the loss function, then train for 10 epochs.
The plot of the losses looks good, with losses decreasing at first, then leveling out. Plotting predictions vs actuals, we see that even this basic LSTM is able to model the timeseries quite accurately. Interestingly, while the predictions are accurate, they seem to be one period behind. This may be due to the lack of lookback, or that the model is simply not predicting, but rather correcting itself at each step.
MAPE is 4.02%, a significant improvement over the baseline and Prophet. Let's see how much we can improve.
LSTM 2¶
For the second model, I increased the lookback window to 100 time periods. This should give the LSTM the sequence data it needs to model the time series. I increased the units in the LSTM layer to give it more parameters to handle the increased lookback data. For the first model, the losses started dropping nicely at the beginning of training, but then the gains started to drop off. With the additional trainable parameters in this model, I think we will be able train for longer without seeing the losses flatline, so I extended the training to 50 epochs. I also added an early stopping callback to prevent overfitting.
The loss curves look good. The telltale signs of overfitting (intial drop in validation loss followed by a steady increase, while the training loss continues to drop) are not present, indicating we're still in the productive part of the training curve. Additionally, we did not trip early stopping, which further supports the idea that there is more performance to squeeze out of this model.
Looking at the plot of predictions vs actuals, we can see very good agreement. The one-period offset we saw in the previous model is now gone. This is looking promising.
MAPE of 1.29% is a significant improvement over the basic LSTM score, but not yet at the level we're trying to beat.
LSTM 3¶
In this iteration, I extended the lookback window to 168 periods, which is 7 days. The thinking here is that giving the LSTM an entire week of sequences will help it learn week-to-week patterns in addition to the daily patterns it has already clearly picked up.
Since the last model did not trip the early stopping callback and loss scores continued to improve at 50 epochs, I extended the training to 100 epochs. The early stopping callback is still active to protect us from overfitting. This time we did trip the early stopping at 93 epochs.
MAPE is now 1.07%, another meaningful improvement, and we're now beating the 1.10% MAPE for the forecast provided by the Spanish power utility, although our 486 MSE is slightly worse than their 455.
LSTM 4¶
My hunch at this point is the model needs more parameters in order to perform better, so I added a 100-unit Dense layer right after the LSTM layer. The thinking is that similar to a CNN architecture, the LSTM is acting as a feature extractor and the fully connected layer can act as a pattern-recognizer and regressor. With the additional trainable parameters, we went all the way out to 100 epochs without tripping the early stopping callback.
Unfortunately, the MAPE got slightly worse at 1.10%.
LSTM 5¶
The last iteration showed some strange behavior of the loss scores where they would repeatedly settle down to a low level, then jump back up, only to settle back down a few epochs later. A possible theory is that the learning rate is too high and the model is jumping from one side of the gradient descent "valley" to the other without being able to settle down to the optimal solution. Or that it is popping out of an optimal solution and into a local minimum. In either case, slowing down the learning rate seemed like a good idea. To do this in the later epochs without slowing down convergence in the early epochs, I implemented an exponential decay learning rate schedule.
After a few attempts with different initial learning rates and decay rates, I settled on 0.001 and 0.98 respectively. This resulted in training going all the way to 100 epochs, but with a much smoother validation loss curve.
It also delivered a MAPE of 1.03% and MSE of 465, easily beating the MAPE target and coming very close on MSE.
6. Conclusion¶
Summary of results¶
| Model | Architecture | Lookback | LSTM units | Epochs | Learning Rate | MAPE | MSE |
|---|---|---|---|---|---|---|---|
| Target | unknown | n/a | n/a | n/a | n/a | 1.10% | 455 |
| Baseline | Moving avg | 30 | n/a | n/a | n/a | 11.5% | 3828 |
| Prophet | Prophet | n/a | n/a | n/a | n/a | 7.02% | 2658 |
| LSTM 1 | LSTM | 1 | 10 | 10 | Fixed | 4.02% | 1432 |
| LSTM 2 | LSTM | 100 | 100 | 50 | Fixed | 1.29% | 551 |
| LSTM 3 | LSTM | 168 | 100 | 93 | Fixed | 1.07% | 486 |
| LSTM 4 | LSTM | 168 | 100 | 100 | Fixed | 1.10% | 479 |
| LSTM 5 | LSTM | 168 | 100 | 100 | Exponential Decay | 1.03% | 465 |
This was a very enjoyable project and I learned a lot about LSTM and timeseries forecasting. I was pleasantly surprised at how well even a basic LSTM performed. It was quite rewarding to see that despite the high level of performance out of the box, I was able to make significant improvements to the MAPE and MSE scores of my LSTM model.
To further evolve this model I could try a more complicated model with additional LSTM layers, but I think we're approaching the limit of what we can do with this data and this model architecture. In order to improve significantly, I think we will have to bring in more data. I am very interested in multi-variate time series analysis and would be excited to try and bring in weather data like temperature or cloud cover.
It is very satisfying when I can apply my skills as a data scientist to meaningful topics like energy production and consumption. We need more data-driven analysis of this topic to have any hope of transitioning to renewable energy sources and establishing a sustainable relationship with our planet. Thanks for reading.
7. References¶
- Kaggle: Hourly energy demand generation and weather. https://www.kaggle.com/datasets/nicholasjhana/energy-consumption-generation-prices-and-weather
- Facebook Prophet: https://facebook.github.io/prophet/
- Keras LSTM: https://keras.io/api/layers/recurrent_layers/lstm/